Working with Text Copora


In [2]:
import os 
import bs4
import json
import nltk
import time 
import codecs
import pickle 
import shutil

from six import string_types

from nltk.tokenize import WordPunctTokenizer
from nltk.corpus.reader.api import CorpusReader
from nltk.corpus.reader.api import CategorizedCorpusReader
from readability.readability import Document as Paper

In [3]:
# Module Variables
ROOT     = os.getcwd() 
CORPUS   = os.path.join(ROOT, "fixtures", "corpus")
GALACTIC = os.path.join(ROOT, "fixtures", "galactic")
TAGGED   = os.path.join(ROOT, "fixtures", "preprocessed")

Plaintext Corpora


In [4]:
DOC_PATTERN = r'(?!\.)[\w_\s]+/[\w\s\d\-]+\.txt'
CAT_PATTERN = r'([\w_\s]+)/.*'

corpus = nltk.corpus.reader.plaintext.CategorizedPlaintextCorpusReader(
    GALACTIC, DOC_PATTERN, cat_pattern=CAT_PATTERN
)

In [24]:
# What properties do we get?
for prop in dir(corpus):
    if not prop.startswith('_'): print(prop)


CorpusView
abspath
abspaths
categories
citation
encoding
ensure_loaded
fileids
license
open
paras
raw
readme
root
sents
unicode_repr
words

In [5]:
# Print out the categories 
for cat in corpus.categories(): print(cat)


Star Trek
Star Wars

In [13]:
# Print out the fileids
for fid in corpus.fileids(): print(fid)


Star Trek/Star Trek - Balance of Terror.txt
Star Trek/Star Trek - First Contact.txt
Star Trek/Star Trek - Generations.txt
Star Trek/Star Trek - Nemesis.txt
Star Trek/Star Trek - The Motion Picture.txt
Star Trek/Star Trek 2 - The Wrath of Khan.txt
Star Trek/Star Trek.txt
Star Wars/Star Wars Episode 1.txt
Star Wars/Star Wars Episode 2.txt
Star Wars/Star Wars Episode 3.txt
Star Wars/Star Wars Episode 4.txt
Star Wars/Star Wars Episode 5.txt
Star Wars/Star Wars Episode 6.txt
Star Wars/Star Wars Episode 7.txt

In [15]:
# View the README
print(corpus.readme())


# Galactic Classifier

Can you spot the difference in language between Star Wars and Star Trek?

Scripts obtained from:

- http://www.imsdb.com/alphabetical/S
- http://www.chakoteya.net/StarTrek/9.htm


In [16]:
# View the LICENSE
print(corpus.license())


Copyright (c) by Lucas Arts and Paramount Pictures. 


In [17]:
# View the CITATION
print(corpus.citation())


@misc{ddl_galactic_2016,
  title = {Galactic {{Corpus}}},
  timestamp = {2016-04-19T17:16:23Z},
  publisher = {{District Data Labs}},
  author = {Voorhees, Will and Bengfort, Benjamin},
  month = apr,
  year = {2016}
}


In [21]:
# View the first 12 sentences in Star Trek
for idx, sent in enumerate(corpus.sents(fileids='Star Trek/Star Trek.txt')):
    print(sent)
    if idx > 12: break


['STAR', 'TREK']
['Written', 'by']
['Roberto', 'Orci', '&', 'Alex', 'Kurtzman']
['November', ',', '2007']
['1', '-', '13', 'OMIT', '1', '-', '13']
['14', 'OMIT', '14']
['15', 'OVER', 'BLACKNESS', ',', 'we', 'HEAR', 'a', 'BACH', 'HARPSICHORD', 'CONCERTO', '.']
['And', 'then', 'a', '15', 'WOMAN', '--', 'breathing', 'hard', '--', 'straining', ',', 'harder', 'and', 'harder', '--', 'until', 'finally', 'we', 'HEAR', 'a', 'NEWBORN', 'BABY', 'CRYING', '--', 'and', 'we', '...']
['FADE', 'IN', ':']
['EXT', '.']
['VULCAN', 'FAMILY', 'HOME', '-', 'DUSK']
['The', 'image', 'is', 'spectacular', ',', 'aglow', 'in', 'DUSK', 'LIGHT', ':', 'a', 'beautiful', 'BABY', ',', 'just', 'born', ',', 'held', 'in', 'a', 'WOMAN', "'", 'S', 'HANDS', '.']
['It', 'is', 'being', 'cleaned', ';', 'warm', 'water', 'runs', 'down', 'its', 'face', 'and', 'body', '.']
['TIGHT', 'ON', 'the', 'MIDWIFE', '(', 'female', ',', '60', "'", 's', ')', 'who', 'holds', 'and', 'cleans', 'the', 'baby', 'as', 'it', 'CRIES', '.']

Baleen Corpus


In [42]:
# Corpus Patterns 
DOC_PATTERN = r'(?!\.)[a-z_\s]+/[a-f0-9]+\.json'
CAT_PATTERN = r'([a-z_\s]+)/.*'


class BaleenCorpusReader(CategorizedCorpusReader, CorpusReader):
    """
    A corpus reader for the raw JSON Baleen documents that have not been
    preprocessed and have the complete feed information exported from Mongo.
    """

    # Tags to extract as paragraphs from the HTML text
    TAGS = [
        'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'li'
    ]

    def __init__(self, root, fileids=DOC_PATTERN, tags=None,
                 word_tokenizer=WordPunctTokenizer(),
                 sent_tokenizer=nltk.data.LazyLoader(
                    'tokenizers/punkt/english.pickle'),
                 encoding='utf8', **kwargs):
        """
        Initialize the corpus reader.  Categorization arguments
        (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
        the ``CategorizedCorpusReader`` constructor.  The remaining arguments
        are passed to the ``CorpusReader`` constructor.
        """
        # Add the default category pattern if not passed into the class.
        if not any(key.startswith('cat_') for key in kwargs.keys()):
            kwargs['cat_pattern'] = CAT_PATTERN

        CategorizedCorpusReader.__init__(self, kwargs)
        CorpusReader.__init__(self, root, fileids, encoding)

        self._word_tokenizer = word_tokenizer
        self._sent_tokenizer = sent_tokenizer
        self._good_tags = tags or self.TAGS

    def feeds(self):
        """
        Opens and returns the collection of feeds associated with the corpus.
        """
        data = self.open('feeds.json')
        return json.load(data)

    def _resolve(self, fileids, categories):
        """
        Returns a list of fileids or categories depending on what is passed
        to each internal corpus reader function. This primarily bubbles up to
        the high level ``docs`` method, but is implemented here similar to
        the nltk ``CategorizedPlaintextCorpusReader``.
        """
        if fileids is not None and categories is not None:
            raise ValueError("Specify fileids or categories, not both")

        if categories is not None:
            return self.fileids(categories)
        return fileids

    def docs(self, fileids=None, categories=None):
        """
        Returns the complete JSON document for every file in the corpus.
        Note that I attempted to use the nltk ``CorpusView`` and ``concat``
        methods here, but was not getting memory safe iteration. Instead the
        simple Python generator by far did a better job of ensuring that file
        handles got closed and that not all data was loaded into memory at a
        time. In the future, I will try to re-implement the corpus view.
        """
        # Resolve the fileids and the categories
        fileids = self._resolve(fileids, categories)

        # Create a generator, loading one document into memory at a time.
        for path, enc, fileid in self.abspaths(fileids, True, True):
            with codecs.open(path, 'r', encoding=enc) as f:
                yield json.load(f)

    def fields(self, fields, fileids=None, categories=None):
        """
        Helper function to extract particular fields from the json documents.
        Fields can be a string or an iterable of fields. If just one field is
        passed in, then the values are returned, otherwise dictionaries of
        the requsted fields are returned.

        This method doesn't raise KeyErrors nor does it yield None values if
        the document doesn't contain a particular field.

        For example to get title and pubdate from the documents:

            corpus.fields(['title', 'pubdate'])

        Or to simply get all of the summaries:

            corpus.fields('summary')

        Note: there is not yet support for nested fields.
        """
        if isinstance(fields, string_types):
            fields = [fields,]

        if len(fields) == 1:
            for doc in self.docs(fileids, categories):
                if fields[0] in doc:
                    yield doc[fields[0]]

        else:
            for doc in self.docs(fileids, categories):
                yield {
                    key: doc.get(key, None)
                    for key in fields
                }

    def html(self, fileids=None, categories=None, readability=True):
        """
        Returns the HTML content from each JSON document for every file in
        the corpus, ensuring that it exists. Note, this simply returns the
        HTML strings, it doesn't perform any parsing of the HTML.

        If readability is True, clean HTML is returned.
        """
        ## Returns a generator of documents.
        html = self.fields('content', fileids, categories)
        if readability:
            for doc in html:
                try:
                    yield Paper(doc).summary()
                except:
                    continue
        else:
            for doc in html:
                yield doc

    def paras(self, fileids=None, categories=None):
        """
        Uses BeautifulSoup to parse the paragraphs from the HTML.
        Currently, this just sends raw text, it does not do any segmentation
        or tokenization as the standard NLTK CorpusReader objects do.
        """
        for html in self.html(fileids, categories):
            soup = bs4.BeautifulSoup(html, 'lxml')
            for element in soup.find_all(self._good_tags):
                yield element.text

    def sents(self, fileids=None, categories=None):
        """
        Uses the built in sentence tokenizer to extract sentences from the
        paragraphs. Note that this method uses BeautifulSoup to parse HTML.
        """
        for paragraph in self.paras(fileids, categories):
            for sentence in self._sent_tokenizer.tokenize(paragraph):
                yield sentence

    def words(self, fileids=None, categories=None):
        """
        Uses the built in word tokenizer to extract tokens from sentences.
        Note that this method uses BeautifulSoup to parse HTML content.
        """
        for sentence in self.sents(fileids, categories):
            for token in self._word_tokenizer.tokenize(sentence):
                yield token

In [43]:
# Load the Baleen Corpus Reader
corpus = BaleenCorpusReader(CORPUS)
print(
    "Baleen corpus contains {} files in {} categories".format(
        len(corpus.fileids()), len(corpus.categories())
    )
)


Baleen corpus contains 24867 files in 12 categories

Preprocessing Corpora


In [35]:
class Preprocessor(object):
    """
    The preprocessor wraps a corpus object (usually a `BaleenCorpusReader`)
    and manages the stateful tokenization and part of speech tagging into a
    directory that is stored in a format that can be read by the
    `BaleenPickledCorpusReader`. This format is more compact and necessarily
    removes a variety of fields from the document that are stored in the JSON
    representation dumped from the Mongo database. This format however is more
    easily accessed for common parsing activity.
    """

    def __init__(self, corpus, target=None):
        """
        The corpus is the `BaleenCorpusReader` to preprocess and pickle.
        The target is the directory on disk to output the pickled corpus to.
        """
        self.corpus = corpus
        self.target = target

    @property
    def target(self):
        return self._target

    @target.setter
    def target(self, path):
        if path is not None:
            # Normalize the path and make it absolute
            path = os.path.expanduser(path)
            path = os.path.expandvars(path)
            path = os.path.abspath(path)

            if os.path.exists(path):
                if not os.path.isdir(path):
                    raise ValueError(
                        "Please supply a directory to write preprocessed data to."
                    )

        self._target = path

    def fileids(self, fileids=None, categories=None):
        """
        Helper function access the fileids of the corpus
        """
        fileids = self.corpus._resolve(fileids, categories)
        if fileids:
            return fileids
        return self.corpus.fileids()

    def abspath(self, fileid):
        """
        Returns the absolute path to the target fileid from the corpus fileid.
        """
        # Find the directory, relative from the corpus root.
        parent = os.path.relpath(
            os.path.dirname(self.corpus.abspath(fileid)), corpus.root
        )

        # Compute the name parts to reconstruct
        basename  = os.path.basename(fileid)
        name, ext = os.path.splitext(basename)

        # Create the pickle file extension
        basename  = name + '.pickle'

        # Return the path to the file relative to the target.
        return os.path.normpath(os.path.join(self.target, parent, basename))

    def tokenize(self, fileid):
        """
        Segments, tokenizes, and tags a document in the corpus. Returns a
        generator of paragraphs, which are lists of sentences, which in turn
        are lists of part of speech tagged words.
        """
        for paragraph in self.corpus.paras(fileids=fileid):
            yield [
                nltk.pos_tag(nltk.wordpunct_tokenize(sent))
                for sent in nltk.sent_tokenize(paragraph)
            ]

    def process(self, fileid):
        """
        For a single file does the following preprocessing work:

            1. Checks the location on disk to make sure no errors occur.
            2. Gets all paragraphs for the given text.
            3. Segements the paragraphs with the sent_tokenizer
            4. Tokenizes the sentences with the wordpunct_tokenizer
            5. Tags the sentences using the default pos_tagger
            6. Writes the document as a pickle to the target location.

        This method is called multiple times from the transform runner.
        """
        # Compute the outpath to write the file to.
        target = self.abspath(fileid)
        parent = os.path.dirname(target)

        # Make sure the directory exists
        if not os.path.exists(parent):
            os.makedirs(parent)

        # Make sure that the parent is a directory and not a file
        if not os.path.isdir(parent):
            raise ValueError(
                "Please supply a directory to write preprocessed data to."
            )

        # Ensure that we are not overwriting existing data
        if os.path.exists(target):
            raise ValueError(
                "Path at '{}' already exists!".format(target)
            )

        # Create a data structure for the pickle
        document = list(self.tokenize(fileid))

        # Open and serialize the pickle to disk
        with open(target, 'wb') as f:
            pickle.dump(document, f, pickle.HIGHEST_PROTOCOL)

        # Clean up the document
        del document

        # Return the target fileid
        return target

    def transform(self, fileids=None, categories=None, target=None):
        """
        Transform the wrapped corpus, writing out the segmented, tokenized,
        and part of speech tagged corpus as a pickle to the target directory.

        This method will also directly copy files that are in the corpus.root
        directory that are not matched by the corpus.fileids()
        """
        # Add the new target directory
        if target: self.target = target

        # Make the target directory if it doesn't already exist
        if not os.path.exists(self.target):
            os.makedirs(self.target)

        # First shutil.copy anything in the root directory.
        names = [
            name  for name in os.listdir(self.corpus.root)
            if not name.startswith('.')
        ]

        # Filter out directories and copy files
        for name in names:
            source = os.path.abspath(os.path.join(self.corpus.root, name))
            target = os.path.abspath(os.path.join(self.target, name))

            if os.path.isfile(source):
                shutil.copy(source, target)

        # Resolve the fileids to start processing
        fileids = self.fileids(fileids, categories)
        return map(self.process, fileids)

In [45]:
start = time.time()
transformer = Preprocessor(corpus, TAGGED)
docs = transformer.transform()
print(
    "Transformed {} docs in {:0.3f} seconds".format(
        len(list(docs)), time.time() - start
    )
    
)


Transformed 24867 docs in 1940.821 seconds